#include <asm/hvm/svm/amd-iommu-proto.h>
#define INTREMAP_TABLE_ORDER 1
-DEFINE_SPINLOCK(int_remap_table_lock);
+static DEFINE_SPINLOCK(int_remap_table_lock);
void *int_remap_table = NULL;
static u8 *get_intremap_entry(u8 vector, u8 dm)
int __init amd_iommu_setup_intremap_table(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&int_remap_table_lock, flags);
-
if ( int_remap_table == NULL )
{
int_remap_table = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER);
if ( int_remap_table == NULL )
- {
- spin_unlock_irqrestore(&int_remap_table_lock, flags);
return -ENOMEM;
- }
memset(int_remap_table, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
}
- spin_unlock_irqrestore(&int_remap_table_lock, flags);
return 0;
}
int __init deallocate_intremap_table(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&int_remap_table_lock, flags);
if ( int_remap_table )
{
__free_amd_iommu_tables(int_remap_table, INTREMAP_TABLE_ORDER);
int_remap_table = NULL;
}
- spin_unlock_irqrestore(&int_remap_table_lock, flags);
return 0;
}
int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
{
u64 iommu_l2e;
- unsigned long flags;
struct hvm_iommu *hd = domain_hvm_iommu(d);
int iw = IOMMU_IO_WRITE_ENABLED;
int ir = IOMMU_IO_READ_ENABLED;
BUG_ON( !hd->root_table );
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
if ( is_hvm_domain(d) && !hd->p2m_synchronized )
goto out;
iommu_l2e = iommu_l2e_from_pfn(hd->root_table, hd->paging_mode, gfn);
if ( iommu_l2e == 0 )
{
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
return -EFAULT;
}
set_iommu_l1e_present(iommu_l2e, gfn, (u64)mfn << PAGE_SHIFT, iw, ir);
out:
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
return 0;
}
BUG_ON( !hd->root_table );
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
if ( is_hvm_domain(d) && !hd->p2m_synchronized )
{
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
return 0;
}
if ( iommu_l2e == 0 )
{
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
return -EFAULT;
}
/* mark PTE as 'page not present' */
clear_iommu_l1e_present(iommu_l2e, gfn);
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
/* send INVALIDATE_IOMMU_PAGES command */
for_each_amd_iommu ( iommu )
unsigned long size, int iw, int ir)
{
u64 iommu_l2e;
- unsigned long flags, npages, i;
+ unsigned long npages, i;
struct hvm_iommu *hd = domain_hvm_iommu(domain);
npages = region_to_pages(phys_addr, size);
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
for ( i = 0; i < npages; ++i )
{
iommu_l2e = iommu_l2e_from_pfn(
if ( iommu_l2e == 0 )
{
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
amd_iov_error("Invalid IO pagetable entry phys_addr = %lx\n",
phys_addr);
return -EFAULT;
phys_addr += PAGE_SIZE;
}
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
return 0;
}
int amd_iommu_sync_p2m(struct domain *d)
{
- unsigned long mfn, gfn, flags;
+ unsigned long mfn, gfn;
u64 iommu_l2e;
struct page_info *page;
struct hvm_iommu *hd;
hd = domain_hvm_iommu(d);
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
if ( hd->p2m_synchronized )
goto out;
if ( iommu_l2e == 0 )
{
spin_unlock(&d->page_alloc_lock);
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
amd_iov_error("Invalid IO pagetable entry gfn = %lx\n", gfn);
return -EFAULT;
}
hd->p2m_synchronized = 1;
out:
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
+ spin_unlock(&hd->mapping_lock);
return 0;
}
static int allocate_domain_resources(struct hvm_iommu *hd)
{
/* allocate root table */
- unsigned long flags;
-
- spin_lock_irqsave(&hd->mapping_lock, flags);
+ spin_lock(&hd->mapping_lock);
if ( !hd->root_table )
{
hd->root_table = alloc_amd_iommu_pgtable();
if ( !hd->root_table )
- goto error_out;
+ {
+ spin_unlock(&hd->mapping_lock);
+ return -ENOMEM;
+ }
}
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
-
+ spin_unlock(&hd->mapping_lock);
return 0;
-
- error_out:
- spin_unlock_irqrestore(&hd->mapping_lock, flags);
- return -ENOMEM;
}
static int get_paging_mode(unsigned long entries)
bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
source->domain_id, target->domain_id);
- spin_unlock(&pcidevs_lock);
return 0;
}
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ spin_lock(&hd->mapping_lock);
if ( hd->root_table )
{
deallocate_next_page_table(hd->root_table, hd->paging_mode);
hd->root_table = NULL;
}
+ spin_unlock(&hd->mapping_lock);
}